idx = (__va - IOREMAP_VIRT_START) >> PAGE_SHIFT;
set_bit(idx, garbage);
}
+
+/* Translate a map-domain-page'd address to the underlying MFN */
+unsigned long domain_page_map_to_mfn(void *va)
+{
+ l1_pgentry_t *l1e;
+
+ ASSERT( (((unsigned long) va) >= MAPCACHE_VIRT_START) &&
+ (((unsigned long) va) <= MAPCACHE_VIRT_END) );
+ l1e = &__linear_l1_table[
+ l1_linear_offset((unsigned long) va)];
+ return l1e_get_pfn(*l1e);
+}
*/
void unmap_domain_page(const void *va);
+
+/*
+ * Given a VA from map_domain_page(), return its underlying MFN.
+ */
+unsigned long domain_page_map_to_mfn(void *va);
+
/*
* Similar to the above calls, except the mapping is accessible in all
* address spaces (not just within the VCPU that created the mapping). Global
#define map_domain_page(mfn) mfn_to_virt(mfn)
#define __map_domain_page(pg) page_to_virt(pg)
#define unmap_domain_page(va) ((void)(va))
+#define domain_page_map_to_mfn(va) virt_to_mfn((unsigned long)(va))
#define map_domain_page_global(mfn) mfn_to_virt(mfn)
#define __map_domain_page_global(pg) page_to_virt(pg)